import pandas as pd
pd.set_option('display.expand_frame_repr', False)
import matplotlib
import matplotlib.pyplot as plt
font = {'size' : 20}
matplotlib.rc('font', **font)
import seaborn as sns
from pylab import rcParams
rcParams["figure.figsize"] = 30,16
from collections import OrderedDict
import datetime as dt
from datetime import date
from datetime import datetime
import sklearn
from sklearn import metrics
from sklearn.model_selection import KFold
from sklearn.preprocessing import MinMaxScaler
import joblib
import numpy as np
np.random.seed(0)
import scipy
import datetime as dt
from datetime import date
import tensorflow
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, LSTM, Activation, InputLayer
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.regularizers import l2
import warnings
warnings.filterwarnings("ignore")
import sys
sys.path.insert(0, "../")
import functions
#Funktion für RMSE erstellen
from keras import backend as K
def root_mean_squared_error(y_true, y_pred):
return K.sqrt(K.mean(K.square(y_pred - y_true)))
Im Rahmen dieses Ansatzes wird die LSTM-Schicht durch eine zusätzliche („normale“) Dense-Schicht erweitert. Die Dense-Schicht soll die durch die LSTM-Schicht geschleusten Daten zusätzlich nachverarbeiten. Dafür werden in der LSTM- und Dense-Schicht jeweils 32 Neuronen eingefügt, die mit Tanh aktiviert werden.
#Hyperparameter
epochs = 100
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=32, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 25000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.48 1.65 1.81 2 2.41 1.74 1.78 3 2.62 1.84 1.90 4 2.29 1.65 1.99 5 2.49 2.01 1.97 Average 2.46 1.78 1.89
Die zusätzliche Dense-Schicht führt beim Training weder zu einer Verbesserung auf den Trainings-, noch auf den Testdaten. Die Lernkurve verhält sich ähnlich wie bei den reinen LSTM-Modellen, allerdings setzt die Konvergenz etwas früher ein. Dies kann aber auch an der geringeren Anzahl von Neuronen liegen.
Beim finalen Test schneidet das Modell mit einem MAPE von 2,74% auf den Testdaten relativ schlecht ab. Auch die Trainingsdaten werden mit einer Abweichung von 1,93% nicht gut erkannt. Es handelt sich also eher um ein unterangepasstes Modell.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/100 273/273 [==============================] - 2s 4ms/step - loss: 0.1218 Epoch 2/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0663 Epoch 3/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0577 Epoch 4/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0521 Epoch 5/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0492 Epoch 6/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0469 Epoch 7/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0447 Epoch 8/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0434 Epoch 9/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0436 Epoch 10/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0409 Epoch 11/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0414 Epoch 12/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0401 Epoch 13/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0400 Epoch 14/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0393 Epoch 15/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0388 Epoch 16/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0380 Epoch 17/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0376 Epoch 18/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0368 Epoch 19/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0372 Epoch 20/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0374 Epoch 21/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 22/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0352 Epoch 23/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 24/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0353 Epoch 25/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0356 Epoch 26/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0354 Epoch 27/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0344 Epoch 28/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0344 Epoch 29/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0354 Epoch 30/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0337 Epoch 31/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0339 Epoch 32/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0337 Epoch 33/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0338 Epoch 34/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 35/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0339 Epoch 36/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 37/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 38/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 39/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 40/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 41/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 42/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 43/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 44/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0327 Epoch 45/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 46/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0316 Epoch 47/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 48/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0316 Epoch 49/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 50/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0315 Epoch 51/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0315 Epoch 52/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 53/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 54/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 55/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0307 Epoch 56/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 57/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 58/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0312 Epoch 59/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0308 Epoch 60/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 61/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0303 Epoch 62/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0317 Epoch 63/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 64/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 65/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 66/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0311 Epoch 67/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 68/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 69/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0303 Epoch 70/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 71/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0299 Epoch 72/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 73/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 74/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0299 Epoch 75/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0299 Epoch 76/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 77/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0297 Epoch 78/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0294 Epoch 79/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0298 Epoch 80/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 81/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0297 Epoch 82/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0292 Epoch 83/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0294 Epoch 84/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 85/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 86/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 87/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0293 Epoch 88/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 89/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 90/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 91/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0285 Epoch 92/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 93/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 94/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 95/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 96/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0284 Epoch 97/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0280 Epoch 98/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 99/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 100/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0282
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.95 0.97
MAE 4582.0 3202.0
MSE 32860335.0 18666035.0
RMSE 5732.0 4320.0
MAPE 2.74 % 1.93 %
Da sich das Modell bei 32 Neuronen etwas unteranpasst, wird versucht, das Modell durch mehr Neuronen in der Desnse-Schicht (64) zu erweitern.
#Hyperparameter
epochs = 100
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 25000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.33 1.74 1.85 2 2.40 1.72 1.75 3 2.33 1.75 1.77 4 2.48 1.81 2.09 5 2.25 1.76 1.81 Average 2.36 1.76 1.85
Das Modell beginnt nun etwas später mit der Konvergenz, allerdings ist beim Training während der Kreuzvalidierungen eine leichte Verbesserung des MAPE auf Trainings- und Testdaten zu erkennen. Eine Überanpassung an die Trainingsdaten ist allerdings nicht erkennbar.
Das Modell verbessert sich gleichermaßen bei den Trainings- und Testdaten. Die Unteranpassung wurde durch zusätzliche Neuronen also zumindest teilweise behoben.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/100 273/273 [==============================] - 2s 4ms/step - loss: 0.1574 Epoch 2/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0675 Epoch 3/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0584 Epoch 4/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0524 Epoch 5/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0472 Epoch 6/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0465 Epoch 7/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0465 Epoch 8/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0420 Epoch 9/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0410 Epoch 10/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0398 Epoch 11/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0401 Epoch 12/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0383 Epoch 13/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0384 Epoch 14/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0383 Epoch 15/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0380 Epoch 16/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0382 Epoch 17/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0361 Epoch 18/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0362 Epoch 19/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0366 Epoch 20/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0373 Epoch 21/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0356 Epoch 22/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 23/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0348 Epoch 24/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0354 Epoch 25/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0346 Epoch 26/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0338 Epoch 27/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0348 Epoch 28/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0346 Epoch 29/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0345 Epoch 30/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0342 Epoch 31/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0340 Epoch 32/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0337 Epoch 33/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 34/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0344 Epoch 35/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 36/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0341 Epoch 37/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 38/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 39/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 40/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0335 Epoch 41/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 42/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 43/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 44/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0327 Epoch 45/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 46/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 47/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 48/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0329 Epoch 49/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0316 Epoch 50/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0325 Epoch 51/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 52/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 53/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 54/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 55/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0316 Epoch 56/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0313 Epoch 57/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0318 Epoch 58/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0318 Epoch 59/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 60/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 61/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0313 Epoch 62/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0308 Epoch 63/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0312 Epoch 64/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0315 Epoch 65/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 66/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0310 Epoch 67/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0308 Epoch 68/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0307 Epoch 69/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0305 Epoch 70/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0313 Epoch 71/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0308 Epoch 72/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 73/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0302 Epoch 74/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0305 Epoch 75/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 76/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0306 Epoch 77/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 78/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 79/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 80/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0302 Epoch 81/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0299 Epoch 82/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 83/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0300 Epoch 84/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 85/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 86/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0298 Epoch 87/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 88/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0294 Epoch 89/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0298 Epoch 90/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 91/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0293 Epoch 92/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 93/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 94/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0288 Epoch 95/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 96/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0292 Epoch 97/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0297 Epoch 98/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 99/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0292 Epoch 100/100 273/273 [==============================] - 1s 4ms/step - loss: 0.0287
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.98
MAE 4160.0 2795.0
MSE 30600502.0 16411776.0
RMSE 5532.0 4051.0
MAPE 2.55 % 1.76 %
Da sich die Unteranpassung durch mehr Neuronen teils beheben lässt, werden die Schichten im nächsten Versuch durch auf 64 bei LSTM und 128 bei Dense erweitert.
#Hyperparameter
epochs = 100
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=64, return_sequences=False, activation="tanh"))
model.add(Dense(units=128, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 20000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.53 1.61 1.84 2 2.34 1.66 1.78 3 2.39 1.71 1.82 4 2.43 1.63 1.92 5 2.39 1.64 1.66 Average 2.42 1.65 1.80
Die Lernkurve zeigt, dass ab der etwa 80. Epoche die Überanpassung beginnt. Die Konvergenz des Modells ist aber noch nicht erreicht. Es sind daher möglicherweise weitere Epochen in Verbindung mit Dropouts oder Regularisierung notwendig.
Anhand der Metriken lässt sich erkennen, dass das Netz trotz der zusätzlichen Neuronen zunächst sehr ähnlich abschneidet wie das Modell mit 32 und 64 Neuronen. Die erweiterten Schichten bringen also nur stark eingeschränkten Mehrwert.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/100 273/273 [==============================] - 3s 5ms/step - loss: 0.1092 Epoch 2/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0592 Epoch 3/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0532 Epoch 4/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0504 Epoch 5/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0464 Epoch 6/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0436 Epoch 7/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0423 Epoch 8/100 273/273 [==============================] - 2s 5ms/step - loss: 0.0417 Epoch 9/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0390 Epoch 10/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0384 Epoch 11/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0373 Epoch 12/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0378 Epoch 13/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0368 Epoch 14/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0368 Epoch 15/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0354 Epoch 16/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0357 Epoch 17/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0346 Epoch 18/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0343 Epoch 19/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0347 Epoch 20/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0340 Epoch 21/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0345 Epoch 22/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0336 Epoch 23/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0333 Epoch 24/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0344 Epoch 25/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0336 Epoch 26/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0339 Epoch 27/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0326 Epoch 28/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0333 Epoch 29/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0328 Epoch 30/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0319 Epoch 31/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0332 Epoch 32/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0330 Epoch 33/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0325 Epoch 34/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0325 Epoch 35/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0324 Epoch 36/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0321 Epoch 37/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0319 Epoch 38/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0321 Epoch 39/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0321 Epoch 40/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0315 Epoch 41/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0321 Epoch 42/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0318 Epoch 43/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0306 Epoch 44/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0308 Epoch 45/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0313 Epoch 46/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0311 Epoch 47/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0306 Epoch 48/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0310 Epoch 49/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0315 Epoch 50/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0303 Epoch 51/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0305 Epoch 52/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0301 Epoch 53/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0307 Epoch 54/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0303A: 0s - loss: 0.0 Epoch 55/100 273/273 [==============================] - 2s 5ms/step - loss: 0.0298 Epoch 56/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 57/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0299 Epoch 58/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 59/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0297A: 0s Epoch 60/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 61/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0289 Epoch 62/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0292 Epoch 63/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0295 Epoch 64/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0289 Epoch 65/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0286 Epoch 66/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0287 Epoch 67/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0287 Epoch 68/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0285 Epoch 69/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0288 Epoch 70/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0287 Epoch 71/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0285 Epoch 72/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0277 Epoch 73/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0284 Epoch 74/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0278 Epoch 75/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0274 Epoch 76/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0280 Epoch 77/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0275 Epoch 78/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0274 Epoch 79/100 273/273 [==============================] - 2s 6ms/step - loss: 0.0277 Epoch 80/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0266 Epoch 81/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0272 Epoch 82/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0274 Epoch 83/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0267 Epoch 84/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0267 Epoch 85/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0267 Epoch 86/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0266 Epoch 87/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0263 Epoch 88/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0264 Epoch 89/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0269 Epoch 90/100 273/273 [==============================] - 2s 5ms/step - loss: 0.0261 Epoch 91/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0264A: 0s - Epoch 92/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0268 Epoch 93/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0266 Epoch 94/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0263 Epoch 95/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0264 Epoch 96/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0267 Epoch 97/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0261 Epoch 98/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0257A: 0s - loss: 0.02 Epoch 99/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0258 Epoch 100/100 273/273 [==============================] - 1s 5ms/step - loss: 0.0257
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.98
MAE 4092.0 2530.0
MSE 30016600.0 12901419.0
RMSE 5479.0 3592.0
MAPE 2.51 % 1.57 %
Da sich LSTM-DENSE-2 und LSTM-DENSE-3 sehr ähnlich verhalten, wird von einer Erweiterung der Neuronen abgesehen. Stattdessen wird ein Modell wie bei LSTM-DENSE-2 trainiert, jedoch mit mehr Epochen. Es soll dabei überprüft werden, ob ein längeres Training mit gegebenenfalls Dropouts oder Regularisierung zu besseren Ergebnissen führt.
#Hyperparameter
epochs = 150
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 20000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.47 1.61 1.91 2 2.77 2.05 2.18 3 2.86 1.72 1.77 4 2.56 1.45 1.74 5 2.67 1.52 1.61 Average 2.67 1.67 1.84
Durch weitere Epochen scheint zunächst keine große Verbesserung zu entstehen. Der MAPE verhält sich beim Training ähnlich.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/150 273/273 [==============================] - 3s 4ms/step - loss: 0.1490 Epoch 2/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0614 Epoch 3/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0526 Epoch 4/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0472 Epoch 5/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0453 Epoch 6/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0428 Epoch 7/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0414 Epoch 8/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0396 Epoch 9/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0399 Epoch 10/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0386 Epoch 11/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0381 Epoch 12/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0376 Epoch 13/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0375 Epoch 14/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0387 Epoch 15/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0363 Epoch 16/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0360 Epoch 17/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0363 Epoch 18/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0356 Epoch 19/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0356 Epoch 20/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0349 Epoch 21/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 22/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0334 Epoch 23/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0347 Epoch 24/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0342 Epoch 25/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0349 Epoch 26/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 27/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 28/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0331 Epoch 29/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0343 Epoch 30/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 31/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0335 Epoch 32/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0327 Epoch 33/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0329 Epoch 34/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0325 Epoch 35/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 36/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 37/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0326 Epoch 38/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 39/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0333 Epoch 40/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 41/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 42/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 43/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 44/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 45/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 46/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 47/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0314 Epoch 48/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0323 Epoch 49/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0318 Epoch 50/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 51/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0323 Epoch 52/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0312 Epoch 53/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0315 Epoch 54/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0313 Epoch 55/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0316 Epoch 56/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0313 Epoch 57/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 58/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 59/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 60/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 61/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 62/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0306 Epoch 63/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0307 Epoch 64/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0311 Epoch 65/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0303 Epoch 66/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0309 Epoch 67/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0300 Epoch 68/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 69/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0306 Epoch 70/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 71/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 72/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0304 Epoch 73/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0295 Epoch 74/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 75/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0297 Epoch 76/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0301 Epoch 77/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0296 Epoch 78/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0295 Epoch 79/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0292 Epoch 80/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0299 Epoch 81/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0300 Epoch 82/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0294 Epoch 83/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0295 Epoch 84/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 85/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0287 Epoch 86/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 87/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 88/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 89/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0287 Epoch 90/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 91/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 92/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 93/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0289 Epoch 94/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0284 Epoch 95/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0287 Epoch 96/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0280 Epoch 97/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0282 Epoch 98/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0288 Epoch 99/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 100/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0284 Epoch 101/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0282 Epoch 102/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 103/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0284 Epoch 104/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0281 Epoch 105/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 106/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0276 Epoch 107/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0280 Epoch 108/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0275 Epoch 109/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0281 Epoch 110/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0276 Epoch 111/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0280 Epoch 112/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0275 Epoch 113/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0274 Epoch 114/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 115/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0274 Epoch 116/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 117/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0269 Epoch 118/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0272 Epoch 119/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0268 Epoch 120/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0272 Epoch 121/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0270 Epoch 122/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0268 Epoch 123/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0274 Epoch 124/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0269 Epoch 125/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0265 Epoch 126/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0264 Epoch 127/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0264 Epoch 128/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0263 Epoch 129/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0264 Epoch 130/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0265 Epoch 131/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 132/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0265 Epoch 133/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0258 Epoch 134/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0258 Epoch 135/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0260 Epoch 136/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0259 Epoch 137/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0260 Epoch 138/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0257 Epoch 139/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0262 Epoch 140/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0262 Epoch 141/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0261 Epoch 142/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0259 Epoch 143/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0260 Epoch 144/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0261 Epoch 145/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0257 Epoch 146/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0257 Epoch 147/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0258 Epoch 148/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0253 Epoch 149/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0257 Epoch 150/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0253
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.98
MAE 3938.0 2472.0
MSE 27542712.0 12300389.0
RMSE 5248.0 3507.0
MAPE 2.41 % 1.53 %
Es soll erneut geprüft werden, ob eine niedrigere Lernrate Verbesserungen herbeiführt. Die Lernrate wird also auf 0,0001 heruntergesetzt.
#Hyperparameter
epochs = 150
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0001)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 20000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.57 1.93 1.99 2 2.55 1.93 1.91 3 2.46 1.93 2.02 4 2.38 1.86 2.11 5 2.35 1.98 1.91 Average 2.46 1.93 1.99
Durch die niedrigere Lernrate werden die Testdaten gleich gut, die Trainingsdaten allerdings etwas schlechter abgebildet. Der Verlauf der Lernkurve kann nicht geändert werden.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/150 273/273 [==============================] - 2s 4ms/step - loss: 0.2233 Epoch 2/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1498 Epoch 3/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1203 Epoch 4/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0950 Epoch 5/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0764 Epoch 6/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0675 Epoch 7/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0633 Epoch 8/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0597 Epoch 9/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0579 Epoch 10/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0557 Epoch 11/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0549 Epoch 12/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0538 Epoch 13/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0521 Epoch 14/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0508 Epoch 15/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0506 Epoch 16/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0494 Epoch 17/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0485 Epoch 18/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0479 Epoch 19/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0475 Epoch 20/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0474 Epoch 21/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0461 Epoch 22/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0457 Epoch 23/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0449 Epoch 24/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0448 Epoch 25/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0441 Epoch 26/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0437 Epoch 27/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0439 Epoch 28/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0434 Epoch 29/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0431 Epoch 30/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0424 Epoch 31/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0424 Epoch 32/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0422 Epoch 33/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0414 Epoch 34/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0419 Epoch 35/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0410 Epoch 36/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0410 Epoch 37/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0403 Epoch 38/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0413 Epoch 39/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0401 Epoch 40/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0399 Epoch 41/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0394 Epoch 42/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0394 Epoch 43/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0393 Epoch 44/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0391 Epoch 45/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0389 Epoch 46/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0387 Epoch 47/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0389 Epoch 48/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0385 Epoch 49/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0384 Epoch 50/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0385 Epoch 51/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0387 Epoch 52/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0378 Epoch 53/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0375 Epoch 54/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0375 Epoch 55/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0381 Epoch 56/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0379 Epoch 57/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0377 Epoch 58/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0369 Epoch 59/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0372 Epoch 60/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0369 Epoch 61/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0363 Epoch 62/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0365 Epoch 63/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0369 Epoch 64/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0368 Epoch 65/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0361 Epoch 66/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0360 Epoch 67/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0357 Epoch 68/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0357 Epoch 69/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0353 Epoch 70/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0365 Epoch 71/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0354 Epoch 72/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 73/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0350 Epoch 74/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0352 Epoch 75/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0354 Epoch 76/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0354 Epoch 77/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0350 Epoch 78/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0349 Epoch 79/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0349 Epoch 80/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0350 Epoch 81/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0347 Epoch 82/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0345 Epoch 83/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0347 Epoch 84/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0345 Epoch 85/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0347 Epoch 86/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0348 Epoch 87/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0347 Epoch 88/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0345 Epoch 89/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0343 Epoch 90/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0342 Epoch 91/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0338 Epoch 92/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0342 Epoch 93/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0345 Epoch 94/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0338 Epoch 95/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0341 Epoch 96/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0338 Epoch 97/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0335 Epoch 98/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0336 Epoch 99/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0339 Epoch 100/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0339 Epoch 101/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0337 Epoch 102/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0342 Epoch 103/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0336 Epoch 104/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0337 Epoch 105/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0334 Epoch 106/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0335 Epoch 107/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0337 Epoch 108/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 109/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0329 Epoch 110/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0334 Epoch 111/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 112/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0325 Epoch 113/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 114/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0331 Epoch 115/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 116/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0332 Epoch 117/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0329 Epoch 118/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 119/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0327 Epoch 120/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 121/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 122/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0331 Epoch 123/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0329 Epoch 124/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0331 Epoch 125/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0328 Epoch 126/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0325 Epoch 127/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 128/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0328 Epoch 129/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 130/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 131/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0327 Epoch 132/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 133/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 134/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 135/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0330 Epoch 136/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0324 Epoch 137/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 138/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 139/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0320 Epoch 140/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0323 Epoch 141/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 142/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 143/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 144/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 145/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0319 Epoch 146/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0320 Epoch 147/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0321 Epoch 148/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0322 Epoch 149/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0318 Epoch 150/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0319
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.97
MAE 4160.0 3031.0
MSE 30458830.0 21777334.0
RMSE 5519.0 4667.0
MAPE 2.46 % 1.93 %
Die Lernrate wird aus experimentellen Gründen noch weiter heruntergesetzt.
#Hyperparameter
epochs = 150
batch_size = 8
window_size = 14
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.00005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 20000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.54 2.25 2.34 2 2.38 2.23 2.20 3 2.42 2.44 2.44 4 2.20 2.15 2.44 5 2.32 2.30 2.16 Average 2.37 2.27 2.32
Der Verlauf der Lernkurve lässt auf eine etwas angemessenere Lernkurve schließen. Allerdings benötigt das Training bei einer derart hohen Lernrate sehr viele Epochen, wobei das Risiko für Überanpassungen steigt. Außerdem wird das Training dadurch sehr ressourcenintensiv.
Zusätzlich muss angemerkt werden, dass das Modell nach etwa 150 Epochen bereits langsam mit der Konvergenz beginnt oder diese sich zumindest abzeichnet. Weitere Epochen würden also vermutlich relativ wenig Verbesserungen bringen. Der MAPE wurde beim aktuellen Netz nur sehr geringfügig verbessert. Daher wird ein anderer Ansatz verfolg, nämlich das Netz durch eine vor der LSTM-Schicht geschaltete Dense-Schicht zu verbessern.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/150 273/273 [==============================] - 3s 4ms/step - loss: 0.3292 Epoch 2/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1763 Epoch 3/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1555 Epoch 4/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1391 Epoch 5/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1234 Epoch 6/150 273/273 [==============================] - 1s 4ms/step - loss: 0.1093 Epoch 7/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0987 Epoch 8/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0905 Epoch 9/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0834 Epoch 10/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0777 Epoch 11/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0747 Epoch 12/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0725 Epoch 13/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0697 Epoch 14/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0682 Epoch 15/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0671 Epoch 16/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0667 Epoch 17/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0650 Epoch 18/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0638 Epoch 19/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0634 Epoch 20/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0621 Epoch 21/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0615 Epoch 22/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0607 Epoch 23/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0603 Epoch 24/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0586 Epoch 25/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0585 Epoch 26/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0577 Epoch 27/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0568 Epoch 28/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0564 Epoch 29/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0556 Epoch 30/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0550 Epoch 31/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0548 Epoch 32/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0542 Epoch 33/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0535 Epoch 34/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0530 Epoch 35/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0523 Epoch 36/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0524 Epoch 37/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0512 Epoch 38/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0508 Epoch 39/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0503 Epoch 40/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0495 Epoch 41/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0495 Epoch 42/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0494 Epoch 43/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0482 Epoch 44/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0478 Epoch 45/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0477 Epoch 46/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0474 Epoch 47/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0471 Epoch 48/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0470 Epoch 49/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0468 Epoch 50/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0466 Epoch 51/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0460 Epoch 52/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0454 Epoch 53/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0457 Epoch 54/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0452 Epoch 55/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0456 Epoch 56/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0446 Epoch 57/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0449 Epoch 58/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0442 Epoch 59/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0444 Epoch 60/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0438 Epoch 61/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0437 Epoch 62/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0436 Epoch 63/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0433 Epoch 64/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0436 Epoch 65/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0435 Epoch 66/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0433 Epoch 67/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0429 Epoch 68/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0429 Epoch 69/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0426 Epoch 70/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0422 Epoch 71/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0422 Epoch 72/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0422 Epoch 73/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0421 Epoch 74/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0414 Epoch 75/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0417 Epoch 76/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0416 Epoch 77/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0411 Epoch 78/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0416 Epoch 79/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0412 Epoch 80/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0412 Epoch 81/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0408 Epoch 82/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0403 Epoch 83/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0409 Epoch 84/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0406 Epoch 85/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0404 Epoch 86/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0405 Epoch 87/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0404 Epoch 88/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0401 Epoch 89/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0404 Epoch 90/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0403 Epoch 91/150 273/273 [==============================] - 2s 6ms/step - loss: 0.0400 Epoch 92/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0397 Epoch 93/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0392 Epoch 94/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0399 Epoch 95/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0394 Epoch 96/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0390 Epoch 97/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0393 Epoch 98/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0389 Epoch 99/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0397 Epoch 100/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0393 Epoch 101/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0386 Epoch 102/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0384 Epoch 103/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0392 Epoch 104/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0393 Epoch 105/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0390 Epoch 106/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0385 Epoch 107/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0383 Epoch 108/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0392 Epoch 109/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0385 Epoch 110/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0379 Epoch 111/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0381 Epoch 112/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0383 Epoch 113/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0378 Epoch 114/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0382 Epoch 115/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0376 Epoch 116/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0378 Epoch 117/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0375 Epoch 118/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0377 Epoch 119/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0379 Epoch 120/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0370 Epoch 121/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0373 Epoch 122/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0376 Epoch 123/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0370 Epoch 124/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0372 Epoch 125/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0375 Epoch 126/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0374 Epoch 127/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0368 Epoch 128/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0366 Epoch 129/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0366 Epoch 130/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0369 Epoch 131/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0368 Epoch 132/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0371 Epoch 133/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0367 Epoch 134/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0371 Epoch 135/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0362 Epoch 136/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0367 Epoch 137/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0369 Epoch 138/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0369 Epoch 139/150 273/273 [==============================] - 2s 6ms/step - loss: 0.0365 Epoch 140/150 273/273 [==============================] - 2s 6ms/step - loss: 0.0363 Epoch 141/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0365 Epoch 142/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0357 Epoch 143/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0363 Epoch 144/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0364 Epoch 145/150 273/273 [==============================] - 1s 5ms/step - loss: 0.0365 Epoch 146/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0357 Epoch 147/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0358 Epoch 148/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0355 Epoch 149/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0363 Epoch 150/150 273/273 [==============================] - 1s 4ms/step - loss: 0.0363
## Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.96
MAE 3861.0 3314.0
MSE 30114644.0 26857096.0
RMSE 5488.0 5182.0
MAPE 2.3 % 2.12 %
Da auch die Fensterlänge optimiert werden soll, wird im Folgenden ein von 14 Tagen verkürztes Fenster mit nur 7 Tagen getestet.
#Hyperparameter
epochs = 150
batch_size = 8
window_size = 7
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 25000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.05 1.78 1.84 2 2.38 1.63 1.69 3 2.45 1.82 1.98 4 2.25 1.57 1.85 5 2.33 1.66 1.76 Average 2.29 1.69 1.82
Der MAPE verändert sich durch das kürzere Fenster geringfügig während der Kreuzvalidierungen, beim finalen Test zeigt sich aber im Vergleich zu LSTM-DENSE-4 keine wirkliche Veränderung.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/150 274/274 [==============================] - 3s 3ms/step - loss: 0.1070 Epoch 2/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0587 Epoch 3/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0543 Epoch 4/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0523 Epoch 5/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0494 Epoch 6/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0462 Epoch 7/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0448 Epoch 8/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0434 Epoch 9/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0405 Epoch 10/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0410 Epoch 11/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0382 Epoch 12/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0379 Epoch 13/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0398 Epoch 14/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0378 Epoch 15/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0382 Epoch 16/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0379 Epoch 17/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0366 Epoch 18/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0371 Epoch 19/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0368 Epoch 20/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0370 Epoch 21/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0363 Epoch 22/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0364 Epoch 23/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0363 Epoch 24/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0357 Epoch 25/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0362 Epoch 26/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0356 Epoch 27/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0353 Epoch 28/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0348 Epoch 29/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0352 Epoch 30/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0353 Epoch 31/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0345 Epoch 32/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0349 Epoch 33/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0345 Epoch 34/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0346 Epoch 35/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0341 Epoch 36/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0345 Epoch 37/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0343 Epoch 38/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0333 Epoch 39/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0336 Epoch 40/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0337 Epoch 41/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0341 Epoch 42/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0330 Epoch 43/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0332 Epoch 44/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0337 Epoch 45/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0333 Epoch 46/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0328 Epoch 47/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0326 Epoch 48/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0323 Epoch 49/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0326 Epoch 50/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0328 Epoch 51/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0331 Epoch 52/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0325 Epoch 53/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0324 Epoch 54/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0326 Epoch 55/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0324 Epoch 56/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0320 Epoch 57/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0324 Epoch 58/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0323 Epoch 59/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0322 Epoch 60/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0321 Epoch 61/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0319 Epoch 62/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0317 Epoch 63/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0321 Epoch 64/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0322 Epoch 65/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0318 Epoch 66/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0322 Epoch 67/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0318 Epoch 68/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0316 Epoch 69/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0315 Epoch 70/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0309 Epoch 71/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0311 Epoch 72/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0313 Epoch 73/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0317 Epoch 74/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0310 Epoch 75/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0313 Epoch 76/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0305 Epoch 77/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0312 Epoch 78/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0305 Epoch 79/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0307 Epoch 80/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0317 Epoch 81/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0308 Epoch 82/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0307 Epoch 83/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0305 Epoch 84/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0298 Epoch 85/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0298 Epoch 86/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0296 Epoch 87/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0302 Epoch 88/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0300 Epoch 89/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0307 Epoch 90/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0301 Epoch 91/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0303 Epoch 92/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0302 Epoch 93/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0294 Epoch 94/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0294 Epoch 95/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0301 Epoch 96/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0296 Epoch 97/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0297 Epoch 98/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0292 Epoch 99/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0297 Epoch 100/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0293 Epoch 101/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0289 Epoch 102/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0295A: 0s - loss: 0.0 Epoch 103/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 104/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0293 Epoch 105/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0292 Epoch 106/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 107/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 108/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0287 Epoch 109/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0291 Epoch 110/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0289 Epoch 111/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 112/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0282 Epoch 113/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0290 Epoch 114/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0282 Epoch 115/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0284 Epoch 116/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0288 Epoch 117/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0282 Epoch 118/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0286 Epoch 119/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0282 Epoch 120/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0287 Epoch 121/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0285 Epoch 122/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0283 Epoch 123/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0281 Epoch 124/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0279 Epoch 125/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0280 Epoch 126/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0280 Epoch 127/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0282 Epoch 128/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0281 Epoch 129/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0284 Epoch 130/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0282 Epoch 131/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0283 Epoch 132/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0275 Epoch 133/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0281 Epoch 134/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0280 Epoch 135/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0273 Epoch 136/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0273 Epoch 137/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0276 Epoch 138/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0276 Epoch 139/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0278 Epoch 140/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0276 Epoch 141/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0276 Epoch 142/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0275 Epoch 143/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 144/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0275 Epoch 145/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0273 Epoch 146/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0272 Epoch 147/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0270 Epoch 148/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0274A: 0s - loss: 0.0 Epoch 149/150 274/274 [==============================] - 1s 3ms/step - loss: 0.0273 Epoch 150/150 274/274 [==============================] - 1s 4ms/step - loss: 0.0272
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.98
MAE 3961.0 2624.0
MSE 25919665.0 14660269.0
RMSE 5091.0 3829.0
MAPE 2.38 % 1.63 %
Es wird ebenfalls getestet, wie sich eine Verlängerung des Fensters von 14 auf 21 Beobachtungen auswirkt.
#Hyperparameter
epochs = 150
batch_size = 8
window_size = 21
def model_creation():
#Modell erstellen
model = Sequential()
model.add(InputLayer(input_shape=(window_size, 4)))
model.add(LSTM(units=32, return_sequences=False, activation="tanh"))
model.add(Dense(units=64, activation="tanh"))
model.add(Dense(units=1))
adam = Adam(learning_rate=0.0005)
model.compile(loss=root_mean_squared_error, optimizer=adam)
return model
#Unskalierte Daten für Analysen laden
df_unscaled = pd.read_csv("../3-Data Preparation/data.csv", index_col=0, parse_dates=True)
df_unscaled.index.freq = "D"
y_test_true = df_unscaled["verbrauch"]["2021-01-01":]
y_train_true = df_unscaled["verbrauch"][window_size:2192]
#Skalierte Daten für Modellierung laden
df_scaled = pd.read_csv("../3-Data Preparation/data_scaled.csv", index_col=0, parse_dates=True)
df_scaled.index.freq = "D"
#Aufteilung X (Merkmale) und y (Ziel)
X = df_scaled[["verbrauch","arbeitstag","temperatur","tagesstunden"]]
#Stromverbrauch wird bei X um eine Stelle nach vorne verschoben, daher entfällt der 01.01.2015
X["arbeitstag"] = X["arbeitstag"].shift(-1)
X["temperatur"] = X["temperatur"].shift(-1)
X["tagesstunden"] = X["tagesstunden"].shift(-1)
X = X[:2556]
y = df_scaled["verbrauch"]
#Aufteilung der Daten in Zeitfenster
def restructure_data(px, py, window_size):
X_, y_ = [], []
idx_range = range(len(px) - (window_size) + 1)
for idx in idx_range:
X_.append(px[idx:idx+window_size])
y_.append(py[idx+window_size])
X_ = np.array(X_)
y_ = np.array(y_)
return X_, y_
X_windows, y_windows = restructure_data(X, y, window_size)
#Aufteilung in Trainings-, Validierungs- und Testdaten
split_by = 2557 - 365 - window_size
X_train = X_windows[:split_by]
y_train = y_windows[:split_by]
X_test = X_windows[split_by:]
y_test = y_windows[split_by:]
#Kreuzvalidierung erstellen
kfold = KFold(n_splits=5, shuffle=True, random_state=0)
#DataFrames für History und Metriken
df_history = pd.DataFrame()
df_history.index.name = "Epoch"
df_metrics = pd.DataFrame(columns=["Testdaten", "Trainingsdaten", "Validierungsdaten"])
df_metrics.index.name = "Split"
iteration = 1
scaler_target = joblib.load("../3-Data Preparation/scaler_endog.save")
#Kreuzvalidierung
for train_index, validation_index in kfold.split(X_train, y_train):
print(iteration, "-", end="\t")
#Modell erstellen
model = model_creation()
#Modell trainieren
history = model.fit(x=X_train[train_index], y=y_train[train_index], epochs=epochs, batch_size=batch_size, validation_data=(X_train[validation_index], y_train[validation_index]), verbose=0)
#Ergebnisse speichern
df_history[str(iteration) + "_train_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["loss"]).reshape(-1, 1)) - 102469)).squeeze()
df_history[str(iteration) + "_validation_loss"] = pd.DataFrame(data=(scaler_target.inverse_transform(np.array(history.history["val_loss"]).reshape(-1, 1)) - 102469)).squeeze()
#Vorhersagen für Test-, Trainings- und Validierungsdaten
preds_test = scaler_target.inverse_transform(model.predict(X_test).reshape(-1, 1))
preds_train = scaler_target.inverse_transform(model.predict(X_train[train_index]).reshape(-1, 1))
preds_validation = scaler_target.inverse_transform(model.predict(X_train[validation_index]).reshape(-1, 1))
df_metrics.loc[iteration] = [round(metrics.mean_absolute_percentage_error(y_test_true, preds_test) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[train_index], preds_train) * 100, 2),
round(metrics.mean_absolute_percentage_error(y_train_true[validation_index], preds_validation) * 100, 2)]
iteration = iteration + 1
#Durchschnittswerte bei History und Metriken berechnen
df_history["train_loss"] = (df_history["1_train_loss"] + df_history["2_train_loss"] + df_history["3_train_loss"] + df_history["4_train_loss"] + df_history["5_train_loss"]) / 5
df_history["validation_loss"] = (df_history["1_validation_loss"] + df_history["2_validation_loss"] + df_history["3_validation_loss"] + df_history["4_validation_loss"] + df_history["5_validation_loss"]) / 5
df_metrics.loc["Average"] = [round(df_metrics["Testdaten"].mean(), 2),
round(df_metrics["Trainingsdaten"].mean(), 2),
round(df_metrics["Validierungsdaten"].mean(), 2)]
1 - 2 - 3 - 4 - 5 -
#Training auswerten (Metriken und Lernkurve)
functions.evaluate_training(df_metrics, df_history, 25000)
Testdaten Trainingsdaten Validierungsdaten Split 1 2.47 1.58 1.63 2 2.75 1.67 1.65 3 2.93 1.86 2.08 4 3.19 1.91 2.22 5 2.19 1.72 2.03 Average 2.71 1.75 1.92
Der MAPE steigt durch das verlängerte Fenster im Vergleich zu DENSE-LSTM-4 ein wenig an, außerdem sind leichte Überanpassungen zu erkennen. Ein längeres Fenster bringt also keine Verbesserungen.
#Finales Modell erstellen
model = model_creation()
#Finales Modell trainieren
history = model.fit(x=X_train, y=y_train, epochs=epochs, batch_size=batch_size, use_multiprocessing=True)
Epoch 1/150 272/272 [==============================] - 3s 7ms/step - loss: 0.1323 Epoch 2/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0630 Epoch 3/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0559 Epoch 4/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0531 Epoch 5/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0499 Epoch 6/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0494 Epoch 7/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0480 Epoch 8/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0439 Epoch 9/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0423 Epoch 10/150 272/272 [==============================] - 2s 9ms/step - loss: 0.0412 Epoch 11/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0398 Epoch 12/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0399 Epoch 13/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0394 Epoch 14/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0392 Epoch 15/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0386 Epoch 16/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0378 Epoch 17/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0373 Epoch 18/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0372 Epoch 19/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0362 Epoch 20/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0359 Epoch 21/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0359 Epoch 22/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0350 Epoch 23/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0362 Epoch 24/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0354 Epoch 25/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0345 Epoch 26/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0340 Epoch 27/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0345 Epoch 28/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0346 Epoch 29/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0339 Epoch 30/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0341 Epoch 31/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0339 Epoch 32/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0340 Epoch 33/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0337 Epoch 34/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0337 Epoch 35/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0326 Epoch 36/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0331 Epoch 37/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0332 Epoch 38/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0332 Epoch 39/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0338 Epoch 40/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0326 Epoch 41/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0329 Epoch 42/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0327A: 0s - Epoch 43/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0331 Epoch 44/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0331 Epoch 45/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0324 Epoch 46/150 272/272 [==============================] - 2s 9ms/step - loss: 0.0321 Epoch 47/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0323 Epoch 48/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0322 Epoch 49/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0322 Epoch 50/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0321 Epoch 51/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0318 Epoch 52/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0323 Epoch 53/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0320 Epoch 54/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0320 Epoch 55/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0317 Epoch 56/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0309 Epoch 57/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0315 Epoch 58/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0313 Epoch 59/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0307 Epoch 60/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0308 Epoch 61/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0312 Epoch 62/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0309 Epoch 63/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0318 Epoch 64/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0305 Epoch 65/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0302 Epoch 66/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0306 Epoch 67/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0317 Epoch 68/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0312 Epoch 69/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0302 Epoch 70/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0301 Epoch 71/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0304 Epoch 72/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0301 Epoch 73/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0308 Epoch 74/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0304 Epoch 75/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0302 Epoch 76/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0298 Epoch 77/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0295 Epoch 78/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0297 Epoch 79/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0299 Epoch 80/150 272/272 [==============================] - 1s 5ms/step - loss: 0.0298 Epoch 81/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 82/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0291 Epoch 83/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0295 Epoch 84/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0290 Epoch 85/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0291 Epoch 86/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0293 Epoch 87/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0295 Epoch 88/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 89/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0293 Epoch 90/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0291 Epoch 91/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0296 Epoch 92/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0286 Epoch 93/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0289 Epoch 94/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0290 Epoch 95/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0289 Epoch 96/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0289 Epoch 97/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0285 Epoch 98/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0281 Epoch 99/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0281 Epoch 100/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0286 Epoch 101/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0285 Epoch 102/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0282 Epoch 103/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0285 Epoch 104/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0280 Epoch 105/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0279 Epoch 106/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0279 Epoch 107/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0279 Epoch 108/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0281 Epoch 109/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0281 Epoch 110/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0280 Epoch 111/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0278 Epoch 112/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0279 Epoch 113/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0282 Epoch 114/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0271 Epoch 115/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0269 Epoch 116/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0277 Epoch 117/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0276 Epoch 118/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0269 Epoch 119/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0275 Epoch 120/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0272 Epoch 121/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0268 Epoch 122/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0263 Epoch 123/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0263 Epoch 124/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0263 Epoch 125/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0267 Epoch 126/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0264 Epoch 127/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0262 Epoch 128/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0262 Epoch 129/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0263 Epoch 130/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0263 Epoch 131/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0267 Epoch 132/150 272/272 [==============================] - 2s 6ms/step - loss: 0.0262 Epoch 133/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0262 Epoch 134/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0260 Epoch 135/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0262 Epoch 136/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0257 Epoch 137/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0259 Epoch 138/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0257 Epoch 139/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0266 Epoch 140/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0259 Epoch 141/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0260 Epoch 142/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0259 Epoch 143/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0256 Epoch 144/150 272/272 [==============================] - 2s 9ms/step - loss: 0.0257 Epoch 145/150 272/272 [==============================] - 2s 8ms/step - loss: 0.0260 Epoch 146/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0256 Epoch 147/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0252 Epoch 148/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0253 Epoch 149/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0250 Epoch 150/150 272/272 [==============================] - 2s 7ms/step - loss: 0.0249
#Vorhersagen erzeugen
scaled_preds_test = model.predict(X_test)
scaled_preds_train = model.predict(X_train)
functions.custom_metrics_lstm(y_test_true, scaled_preds_test, y_train_true, scaled_preds_train)
Testdaten Trainingsdaten
R2 0.96 0.98
MAE 4078.0 2458.0
MSE 29155808.0 12671160.0
RMSE 5400.0 3560.0
MAPE 2.44 % 1.53 %